#define preempt_stop
#define retint_kernel retint_restore_args
#endif
-
+
/*
* C code is not supposed to know about undefined top of stack. Every time
1:
.endm
-/*
+/*
* A newly forked process directly context switches into this.
*/
/* rdi: prev */
XEN_UNBLOCK_EVENTS(%r11)
RESTORE_ARGS 0,8,0
SWITCH_TO_USER ECF_IN_SYSCALL
-
+
/* Handle reschedules */
/* edx: work, edi: workmask */
sysret_careful:
#else
SAVE_ARGS
leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
-#endif
+#endif
#if 0 /* For Xen we don't need to do this */
testl $3,CS(%rdi)
je 1f
- swapgs
+ swapgs
#endif
1: addl $1,%gs:pda_irqcount # RED-PEN should check preempt count
movq %gs:pda_irqstackptr,%rax
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
jnz retint_careful
-retint_restore_args:
- RESTORE_ARGS 0,8,0
+retint_restore_args:
+ RESTORE_ARGS 0,8,0
testb $3,8(%rsp) # check CS
jnz user_mode
kernel_mode:
orb $3,1*8(%rsp)
- iretq
+ iretq
user_mode:
SWITCH_TO_USER 0
-
+
/* edi: workmask, edx: work */
retint_careful:
bt $TIF_NEED_RESCHED,%edx
/* sti */
pushq %rdi
call schedule
- popq %rdi
+ popq %rdi
XEN_GET_VCPU_INFO(%r11)
XEN_BLOCK_EVENTS(%r11)
GET_THREAD_INFO(%rcx)
CFI_REL_OFFSET rsp,(RSP-RDI)
CFI_REL_OFFSET rip,(RIP-RDI)
/* rdi slot contains rax, oldrax contains error code */
- cld
+ cld
subq $14*8,%rsp
CFI_ADJUST_CFA_OFFSET (14*8)
movq %rsi,13*8(%rsp)
andb $1,%al # %al == mask & ~saved_mask
jnz restore_all_enable_events # != 0 => reenable event delivery
XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
-error_exit:
+error_exit:
RESTORE_REST
/* cli */
GET_THREAD_INFO(%rcx)
movl $_TIF_WORK_MASK,%edi
andl %edi,%edx
jnz retint_careful
- RESTORE_ARGS 0,8,0
+ RESTORE_ARGS 0,8,0
SWITCH_TO_USER 0
CFI_ENDPROC
je error_swapgs
jmp error_sti
#endif
-
+
ENTRY(hypervisor_callback)
zeroentry do_hypervisor_callback
.long 3b,8b; \
.long 4b,9b; \
.previous
-
+
.section __ex_table,"a"
.align 8
.quad gs_change,bad_gs
CFI_ENDPROC
#if 0
- /* runs on exception stack */
+ /* runs on exception stack */
ENTRY(nmi)
CFI_STARTPROC
pushq $-1
.code64
ENTRY(_start)
cld
- movq init_rsp(%rip),%rsp
+ movq init_rsp(%rip),%rsp
/* Copy the necessary stuff from xen_start_info structure. */
movq $xen_start_info_union,%rdi
movq $64,%rcx /* sizeof (union xen_start_info_union) / sizeof (long) */
pushq %rax
lretq
#endif
-
+
ENTRY(stext)
ENTRY(_stext)
.org 0x2000
ENTRY(init_level4_user_pgt)
.fill 512,8,0
-
+
/*
* This is used for vsyscall area mapping as we have a different
* level4 page table for user.
.quad 0x008ffa000000ffff /* __KERNEL_COMPAT32_CS */
.quad 0x00affa000000ffff /* __KERNEL_CS */
.quad 0x00cff2000000ffff /* __KERNEL_DS */
-
+
.quad 0x00cffa000000ffff /* __USER32_CS */
.quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
.quad 0x00affa000000ffff /* __USER_CS */
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
*/
-
+
#if 0
.align L1_CACHE_BYTES
#endif
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
static void flush_ldt(void *null)
{
- if (current->active_mm) {
- load_LDT(¤t->active_mm->context);
- }
+ if (current->active_mm)
+ load_LDT(¤t->active_mm->context);
}
#endif
* the same here.
*/
int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction)
+ int nents, int direction)
{
- int i;
-
- BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
- s->dma_length = s->length;
- }
- return nents;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nents; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ s->dma_length = s->length;
+ }
+ return nents;
}
EXPORT_SYMBOL(dma_map_sg);
* pci_unmap_single() above.
*/
void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, int dir)
+ int nents, int dir)
{
- int i;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
- BUG_ON(s->page == NULL);
- BUG_ON(s->dma_address == 0);
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
- }
+ int i;
+ for (i = 0; i < nents; i++) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(s->page == NULL);
+ BUG_ON(s->dma_address == 0);
+ dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ }
}
struct dma_coherent_mem {
if (cpu_isset(cpu, cpu_idle_map))
cpu_clear(cpu, cpu_idle_map);
rmb();
-
+
__IRQ_STAT(cpu,idle_timestamp) = jiffies;
xen_idle();
}
struct tss_struct *tss = &per_cpu(init_tss, cpu);
physdev_op_t iopl_op, iobmp_op;
multicall_entry_t _mcl[8], *mcl = _mcl;
-
+
/*
* This is basically '__unlazy_fpu', except that we queue a
* multicall to indicate FPU task switch, rather than
if (doit) {
/* set the selector to 0 to not confuse
__switch_to */
- asm volatile("movl %0,%%fs" :: "r" (0));
+ asm volatile("movl %0,%%fs" :: "r" (0));
ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr);
}
ptregs.ss &= 0xffff;
elf_core_copy_regs(regs, &ptregs);
-
+
boot_option_idle_override = 1;
return 1;
}
struct cpuinfo_x86 boot_cpu_data;
-
unsigned long mmu_cr4_features;
EXPORT_SYMBOL_GPL(mmu_cr4_features);
disable_apic = 1;
if (!memcmp(from, "noapic", 6))
- skip_ioapic_setp = 1;
+ skip_ioapic_setup = 1;
if (!memcmp(from, "apic", 4)) {
skip_ioapic_setup = 0;
#ifndef CONFIG_DISCONTIGMEM
static void __init contig_initmem_init(void)
{
- unsigned long bootmap_size, bootmap;
+ unsigned long bootmap_size, bootmap;
/*
* partially used pages are not usable - thus
unsigned long low_mem_size;
int i, j;
physdev_op_t op;
-
+
#if 0
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
#else
}
}
#endif
- paging_init();
+ paging_init();
/* Make sure we have a large enough P->M table. */
if (end_pfn > xen_start_info.nr_pages) {
}
#if 0
- check_ioapic();
+ check_ioapic();
#endif
#ifdef CONFIG_ACPI_BOOT
get_smp_config();
init_apic_mappings();
#endif
-
+
/* XXX Disable irqdebug until we have a way to avoid interrupt
* conflicts. */
/* noirqdebug_setup(""); */
pci_mem_start = low_mem_size;
#ifdef CONFIG_GART_IOMMU
- iommu_hole_init();
+ iommu_hole_init();
#endif
op.cmd = PHYSDEVOP_SET_IOPL;
*/
int __init nonx_setup(char *str)
{
- if (!strcmp(str, "on")) {
+ if (!strncmp(str, "on", 2)) {
__supported_pte_mask |= _PAGE_NX;
do_not_nx = 0;
} else if (!strncmp(str, "off", 3)) {
{
pgd_t *old_level4 = (pgd_t *)xen_start_info.pt_base;
struct x8664_pda *pda = &cpu_pda[cpu];
-
+
/* Setup up data that may be needed in __get_free_pages early */
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
(unsigned long)(cpu_pda + cpu));
-
- pda->me = pda;
- pda->cpunumber = cpu;
- pda->irqcount = -1;
- pda->kernelstack =
- (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
- pda->active_mm = &init_mm;
+
+ pda->me = pda;
+ pda->cpunumber = cpu;
+ pda->irqcount = -1;
+ pda->kernelstack =
+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
+ pda->active_mm = &init_mm;
pda->mmu_state = 0;
pda->kernel_mode = 1;
printk("Initializing CPU#%d\n", cpu);
#if 0
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
#endif
/*
* Initialize the per-CPU GDT with the boot GDT,
#if 0
memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
-
+
#endif
memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
GDT_ENTRY_TLS_ENTRIES * 8);
- /*
+ /*
* Delete NT
*/
#ifdef CONFIG_IA32_EMULATION
set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif
-
+
/*
* Should be a barrier for any external CPU state.
*/
void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-{
+{
unsigned long address = __fix_to_virt(idx);
if (idx >= __end_of_fixed_addresses) {
pte_t *pte = pte_offset_kernel(pmd, addr);
return (pte->pte >> PAGE_SHIFT);
-}
+}
/*
* We start using from start_pfn
*/
static __init void *alloc_low_page(unsigned long *phys)
-{
+{
unsigned long pfn = table_end++;
*phys = (pfn << PAGE_SHIFT);
memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE);
return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map);
-}
+}
#define PTE_SIZE PAGE_SIZE
pmd_t *pmd;
paddr = address + i*PUD_SIZE;
- if (paddr >= end) {
+ if (paddr >= end) {
for (; i < PTRS_PER_PUD; i++, pud++)
set_pud(pud, __pud(0));
break;
- }
+ }
pmd = alloc_low_page(&pmd_phys);
make_page_readonly(pmd);
unsigned long pte_phys;
pte_t *pte, *pte_save;
- if (paddr >= end) {
+ if (paddr >= end) {
for (; j < PTRS_PER_PMD; j++, pmd++)
set_pmd(pmd, __pmd(0));
break;
- }
+ }
pte = alloc_low_page(&pte_phys);
pte_save = pte;
for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) {
{
unsigned long puds, pmds, ptes;
- puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
tables_reserved = round_up(puds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE)
Dprintk("init_memory_mapping\n");
- find_early_table_space(end);
+ find_early_table_space(end);
- start = (unsigned long)__va(start);
- end = (unsigned long)__va(end);
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
- for (; start < end; start = next) {
+ for (; start < end; start = next) {
unsigned long pud_phys;
pud_t *pud = alloc_low_page(&pud_phys);
make_page_readonly(pud);
next = start + PGDIR_SIZE;
if (next > end)
next = end;
- phys_pud_init(pud, __pa(start), __pa(next));
- set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
- }
+ phys_pud_init(pud, __pa(start), __pa(next));
+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+ }
early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
table_start<<PAGE_SHIFT,
/* unsigned int max_dma; */
/* max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; */
/* if (end_pfn < max_dma) */
- zones_size[ZONE_DMA] = end_pfn;
+ zones_size[ZONE_DMA] = end_pfn;
#if 0
else {
zones_size[ZONE_DMA] = max_dma;
zones_size[ZONE_NORMAL] = end_pfn - max_dma;
- }
+ }
#endif
- free_area_init(zones_size);
+ free_area_init(zones_size);
}
__set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info,
pud = pud_offset(pgd, address);
if (pud_none(*pud))
continue;
- pmd = pmd_offset(pud, address);
+ pmd = pmd_offset(pud, address);
if (!pmd || pmd_none(*pmd))
continue;
if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
* the WP-bit has been tested.
*/
#ifndef CONFIG_SMP
- zap_low_mappings();
+ zap_low_mappings();
#endif
}
int kern_addr_valid(unsigned long addr)
{
- unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
if (above != 0 && above != -1UL)
return 0;
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
return 0;
if (pud_none(*pud))
return 0;
- pmd = pmd_offset(pud, addr);
+ pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
if (pmd_large(*pmd))
return pfn_valid(pmd_pfn(*pmd));
- pte = pte_offset_kernel(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
return pfn_valid(pte_pfn(*pte));
*/
if (is_local_lowmem(phys_addr)) {
char *t_addr, *t_end;
- struct page *page;
+ struct page *page;
t_addr = bus_to_virt(phys_addr);
t_end = t_addr + (size - 1);